case dest_LowestPrio:
{
target = vlapic_lowest_prio(d, NULL, 0, dest, dest_mode);
- if ( target != NULL )
+ if ( target != NULL ) {
vmsi_inj_irq(d, target, vector, trig_mode, delivery_mode);
+ hvm_irq_dpci->mirq[pirq].gmsi.old_gvec =
+ hvm_irq_dpci->mirq[pirq].gmsi.gvec;
+ }
else
HVM_DBG_LOG(DBG_LEVEL_IOAPIC, "null round robin: "
"vector=%x delivery_mode=%x\n",
{
for_each_vcpu ( d, v )
if ( vlapic_match_dest(vcpu_vlapic(v), NULL,
- 0, dest, dest_mode) )
+ 0, dest, dest_mode) ) {
vmsi_inj_irq(d, vcpu_vlapic(v),
vector, trig_mode, delivery_mode);
+ hvm_irq_dpci->mirq[pirq].gmsi.old_gvec =
+ hvm_irq_dpci->mirq[pirq].gmsi.gvec;
+ }
break;
}
{
hvm_irq_dpci->mirq[pirq].flags = HVM_IRQ_DPCI_MACH_MSI |
HVM_IRQ_DPCI_GUEST_MSI;
+ hvm_irq_dpci->mirq[pirq].gmsi.old_gvec = pt_irq_bind->u.msi.gvec;
hvm_irq_dpci->mirq[pirq].gmsi.gvec = pt_irq_bind->u.msi.gvec;
+ hvm_irq_dpci->mirq[pirq].gmsi.old_gflags = pt_irq_bind->u.msi.gflags;
hvm_irq_dpci->mirq[pirq].gmsi.gflags = pt_irq_bind->u.msi.gflags;
/* bind after hvm_irq_dpci is setup to avoid race with irq handler*/
rc = pirq_guest_bind(d->vcpu[0], pirq, 0);
{
hvm_irq_dpci->mirq[pirq].gmsi.gflags = 0;
hvm_irq_dpci->mirq[pirq].gmsi.gvec = 0;
+ hvm_irq_dpci->mirq[pirq].gmsi.old_gvec = 0;
+ hvm_irq_dpci->mirq[pirq].gmsi.old_gflags = 0;
hvm_irq_dpci->mirq[pirq].flags = 0;
clear_bit(pirq, hvm_irq_dpci->mapping);
spin_unlock(&d->event_lock);
}
/* if pirq is already mapped as vmsi, update the guest data/addr */
- hvm_irq_dpci->mirq[pirq].gmsi.gvec = pt_irq_bind->u.msi.gvec;
- hvm_irq_dpci->mirq[pirq].gmsi.gflags = pt_irq_bind->u.msi.gflags;
+ if ( hvm_irq_dpci->mirq[pirq].gmsi.gvec != pt_irq_bind->u.msi.gvec ) {
+ hvm_irq_dpci->mirq[pirq].gmsi.old_gvec =
+ hvm_irq_dpci->mirq[pirq].gmsi.gvec;
+ hvm_irq_dpci->mirq[pirq].gmsi.old_gflags =
+ hvm_irq_dpci->mirq[pirq].gmsi.gflags;
+ hvm_irq_dpci->mirq[pirq].gmsi.gvec = pt_irq_bind->u.msi.gvec;
+ hvm_irq_dpci->mirq[pirq].gmsi.gflags = pt_irq_bind->u.msi.gflags;
+ }
}
/* Caculate dest_vcpu_id for MSI-type pirq migration */
dest = hvm_irq_dpci->mirq[pirq].gmsi.gflags & VMSI_DEST_ID_MASK;
pirq = find_next_bit(hvm_irq_dpci->mapping, d->nr_pirqs, pirq + 1) )
{
if ( (!(hvm_irq_dpci->mirq[pirq].flags & HVM_IRQ_DPCI_MACH_MSI)) ||
- (hvm_irq_dpci->mirq[pirq].gmsi.gvec != vector) )
+ (hvm_irq_dpci->mirq[pirq].gmsi.gvec != vector &&
+ hvm_irq_dpci->mirq[pirq].gmsi.old_gvec != vector) )
continue;
- dest = hvm_irq_dpci->mirq[pirq].gmsi.gflags & VMSI_DEST_ID_MASK;
- dest_mode = !!(hvm_irq_dpci->mirq[pirq].gmsi.gflags & VMSI_DM_MASK);
+ if ( hvm_irq_dpci->mirq[pirq].gmsi.gvec == vector ) {
+ dest = hvm_irq_dpci->mirq[pirq].gmsi.gflags & VMSI_DEST_ID_MASK;
+ dest_mode = !!(hvm_irq_dpci->mirq[pirq].gmsi.gflags & VMSI_DM_MASK);
+ } else {
+ dest = hvm_irq_dpci->mirq[pirq].gmsi.old_gflags & VMSI_DEST_ID_MASK;
+ dest_mode = !!(hvm_irq_dpci->mirq[pirq].gmsi.old_gflags & VMSI_DM_MASK);
+ }
if ( vlapic_match_dest(vcpu_vlapic(current), NULL, 0, dest, dest_mode) )
break;
}
+
if ( pirq < d->nr_pirqs )
__msi_pirq_eoi(d, pirq);
spin_unlock(&d->event_lock);